bitkeeper revision 1.1159.69.20 (413c7829P-RU1PkUqzwGG3rQ_fI5Qg)
authorcl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Mon, 6 Sep 2004 14:46:01 +0000 (14:46 +0000)
committercl349@freefall.cl.cam.ac.uk <cl349@freefall.cl.cam.ac.uk>
Mon, 6 Sep 2004 14:46:01 +0000 (14:46 +0000)
Manage L1 pagetables in a kmem_cache, which allows us to keep the
pages pinned while they are not used.

linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/init.c
linux-2.6.8.1-xen-sparse/arch/xen/i386/mm/pgtable.c
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgalloc.h
linux-2.6.8.1-xen-sparse/include/asm-xen/asm-i386/pgtable.h

index 7886cd64c7c9394c73c21d63c638528b6a16835c..2e80b31c7698717aa526748cfa6d52794d24066c 100644 (file)
@@ -726,9 +726,18 @@ void __init mem_init(void)
 
 kmem_cache_t *pgd_cache;
 kmem_cache_t *pmd_cache;
+kmem_cache_t *pte_cache;
 
 void __init pgtable_cache_init(void)
 {
+       pte_cache = kmem_cache_create("pte",
+                               PTRS_PER_PTE*sizeof(pte_t),
+                               PTRS_PER_PTE*sizeof(pte_t),
+                               0,
+                               pte_ctor,
+                               pte_dtor);
+       if (!pte_cache)
+               panic("pgtable_cache_init(): Cannot create pte cache");
        if (PTRS_PER_PMD > 1) {
                pmd_cache = kmem_cache_create("pmd",
                                        PTRS_PER_PMD*sizeof(pmd_t),
index b6182d9961fd236a6c536bae8910bcad283272bc..bebd04f92dfb4bd3d28f93890ae68c79faa0d7aa 100644 (file)
@@ -185,32 +185,44 @@ pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
        return pte;
 }
 
+void pte_ctor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+
+       clear_page(pte);
+       __make_page_readonly(pte);
+       queue_pte_pin(virt_to_phys(pte));
+       flush_page_update_queue();
+}
+
+void pte_dtor(void *pte, kmem_cache_t *cache, unsigned long unused)
+{
+
+       queue_pte_unpin(virt_to_phys(pte));
+       __make_page_writable(pte);
+       flush_page_update_queue();
+}
+
 struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
 {
-       struct page *pte;
+       pte_t *ptep;
 
 #ifdef CONFIG_HIGHPTE
+       struct page *pte;
+
        pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
-#else
-       pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
-#endif
-       if (pte) {
-#ifdef CONFIG_HIGHPTE
-               void *kaddr = kmap_atomic(pte, KM_USER0);
-               clear_page(kaddr);
-               kunmap_atomic_force(kaddr, KM_USER0);
-#else
+       if (pte == NULL)
+               return pte;
+       if (pte >= highmem_start_page) {
                clear_highpage(pte);
-#endif
-#ifdef CONFIG_HIGHPTE
-               if (pte < highmem_start_page)
-#endif
-               {
-                       __make_page_readonly(phys_to_virt(page_to_pseudophys(pte)));
-                       flush_page_update_queue();
-               }
+               return pte;
        }
-       return pte;
+       /* not a highmem page -- free page and grab one from the cache */
+       __free_page(pte);
+#endif
+       ptep = kmem_cache_alloc(pte_cache, GFP_KERNEL);
+       if (ptep)
+               return virt_to_page(ptep);
+       return NULL;
 }
 
 void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
index 0ba1e5a542138cdf42bd589944bd5b94a9bddc0d..3d36223ecad25e0426b66e6f33b986cfbfedc440 100644 (file)
@@ -41,19 +41,16 @@ static inline void pte_free(struct page *pte)
 #ifdef CONFIG_HIGHPTE
        if (pte < highmem_start_page)
 #endif
-       {
-               __make_page_writable(phys_to_virt(page_to_pseudophys(pte)));
+               kmem_cache_free(pte_cache,
+                               phys_to_virt(page_to_pseudophys(pte)));
+#ifdef CONFIG_HIGHPTE
+       else
                __free_page(pte);
-               flush_page_update_queue();
-       }
+#endif
 }
 
 
-#define __pte_free_tlb(tlb,pte) do {                   \
-       tlb_remove_page((tlb),(pte));                   \
-       flush_page_update_queue();                      \
-       /* XXXcl queue */ \
-} while (0)
+#define __pte_free_tlb(tlb,pte)                pte_free(pte)
 
 /*
  * allocating and freeing a pmd is trivial: the 1-entry pmd is
index a84b1a23089c6dd4d691d7428ab898f932717eee..20dda455dc00789916de81b8a8bed24e015911eb 100644 (file)
@@ -35,9 +35,12 @@ extern unsigned long empty_zero_page[1024];
 extern pgd_t swapper_pg_dir[1024];
 extern kmem_cache_t *pgd_cache;
 extern kmem_cache_t *pmd_cache;
+extern kmem_cache_t *pte_cache;
 extern spinlock_t pgd_lock;
 extern struct page *pgd_list;
 
+void pte_ctor(void *, kmem_cache_t *, unsigned long);
+void pte_dtor(void *, kmem_cache_t *, unsigned long);
 void pmd_ctor(void *, kmem_cache_t *, unsigned long);
 void pgd_ctor(void *, kmem_cache_t *, unsigned long);
 void pgd_dtor(void *, kmem_cache_t *, unsigned long);
@@ -315,9 +318,7 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
 ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
 
 #define pmd_clear(xp)  do {                                    \
-       pmd_t p = *(xp);                                        \
        set_pmd(xp, __pmd(0));                                  \
-       __make_page_writable((void *)pmd_page_kernel(p));       \
        xen_flush_page_update_queue();                          \
 } while (0)